This workbook performs the basic data exploration of the dataset.
dataexp_level_exclusion_threshold <- 100
dataexp_cat_level_count <- 40
dataexp_hist_bins_count <- 50
First we load the dataset.
data(freMTPLfreq)
data(freMTPLsev)
policy_data_tbl <- freMTPLfreq %>% as_data_frame
claim_data_tbl <- freMTPLsev %>% as_data_frame
rawdata_tbl <- claim_data_tbl %>%
inner_join(policy_data_tbl, by = 'PolicyID') %>%
select(-ClaimNb)
glimpse(rawdata_tbl)
## Observations: 16,181
## Variables: 10
## $ PolicyID <int> 63987, 310037, 314463, 318713, 309380, 309380, 318738, 305914, 31...
## $ ClaimAmount <int> 1172, 1905, 1150, 1220, 55077, 7593, 1176, 1202, 1203, 1232, 1224...
## $ Exposure <dbl> 0.95, 0.71, 0.86, 0.78, 0.21, 0.21, 0.78, 0.28, 0.43, 0.95, 0.75,...
## $ Power <fctr> d, d, l, o, h, h, g, j, d, d, o, f, h, i, j, f, k, g, g, e, f, f...
## $ CarAge <int> 1, 2, 0, 10, 2, 2, 2, 4, 1, 1, 10, 3, 2, 0, 2, 4, 9, 2, 7, 4, 1, ...
## $ DriverAge <int> 49, 32, 37, 48, 53, 53, 29, 64, 51, 50, 57, 48, 63, 37, 33, 35, 3...
## $ Brand <fctr> Japanese (except Nissan) or Korean, Japanese (except Nissan) or ...
## $ Gas <fctr> Regular, Regular, Diesel, Regular, Diesel, Diesel, Diesel, Diese...
## $ Region <fctr> R11, R31, R11, R11, R11, R11, R74, R11, R31, R54, R72, R11, R53,...
## $ Density <int> 2354, 570, 27000, 2400, 15725, 15725, 45, 5859, 518, 2694, 127, 3...
### _TEMPLATE_
### Do simple datatype transforms and save output in data_tbl
data_tbl <- rawdata_tbl
names(data_tbl) <- data_tbl %>% names %>% clean_names
glimpse(data_tbl)
## Observations: 16,181
## Variables: 10
## $ policy_id <int> 63987, 310037, 314463, 318713, 309380, 309380, 318738, 305914, 3...
## $ claim_amount <int> 1172, 1905, 1150, 1220, 55077, 7593, 1176, 1202, 1203, 1232, 122...
## $ exposure <dbl> 0.95, 0.71, 0.86, 0.78, 0.21, 0.21, 0.78, 0.28, 0.43, 0.95, 0.75...
## $ power <fctr> d, d, l, o, h, h, g, j, d, d, o, f, h, i, j, f, k, g, g, e, f, ...
## $ car_age <int> 1, 2, 0, 10, 2, 2, 2, 4, 1, 1, 10, 3, 2, 0, 2, 4, 9, 2, 7, 4, 1,...
## $ driver_age <int> 49, 32, 37, 48, 53, 53, 29, 64, 51, 50, 57, 48, 63, 37, 33, 35, ...
## $ brand <fctr> Japanese (except Nissan) or Korean, Japanese (except Nissan) or...
## $ gas <fctr> Regular, Regular, Diesel, Regular, Diesel, Diesel, Diesel, Dies...
## $ region <fctr> R11, R31, R11, R11, R11, R11, R74, R11, R31, R54, R72, R11, R53...
## $ density <int> 2354, 570, 27000, 2400, 15725, 15725, 45, 5859, 518, 2694, 127, ...
We now create derived features useful for modelling. These values are new variables calculated from existing variables in the data.
data_tbl <- data_tbl %>%
mutate(policy_id = policy_id %>% as.character
,cat_driver_age = cut(driver_age, c(17,22,26,42,74,Inf)
,ordered_result = TRUE)
,cat_car_age = cut(car_age, c(0,1,4,15,Inf)
,include.lowest = TRUE
,ordered_result = TRUE)
,cat_density = cut(density, c(0,40,200,500,4500,Inf)
,include.lowest = TRUE
,ordered_result = TRUE)
)
glimpse(data_tbl)
## Observations: 16,181
## Variables: 13
## $ policy_id <chr> "63987", "310037", "314463", "318713", "309380", "309380", "31...
## $ claim_amount <int> 1172, 1905, 1150, 1220, 55077, 7593, 1176, 1202, 1203, 1232, 1...
## $ exposure <dbl> 0.95, 0.71, 0.86, 0.78, 0.21, 0.21, 0.78, 0.28, 0.43, 0.95, 0....
## $ power <fctr> d, d, l, o, h, h, g, j, d, d, o, f, h, i, j, f, k, g, g, e, f...
## $ car_age <int> 1, 2, 0, 10, 2, 2, 2, 4, 1, 1, 10, 3, 2, 0, 2, 4, 9, 2, 7, 4, ...
## $ driver_age <int> 49, 32, 37, 48, 53, 53, 29, 64, 51, 50, 57, 48, 63, 37, 33, 35...
## $ brand <fctr> Japanese (except Nissan) or Korean, Japanese (except Nissan) ...
## $ gas <fctr> Regular, Regular, Diesel, Regular, Diesel, Diesel, Diesel, Di...
## $ region <fctr> R11, R31, R11, R11, R11, R11, R74, R11, R31, R54, R72, R11, R...
## $ density <int> 2354, 570, 27000, 2400, 15725, 15725, 45, 5859, 518, 2694, 127...
## $ cat_driver_age <ord> (42,74], (26,42], (26,42], (42,74], (42,74], (42,74], (26,42],...
## $ cat_car_age <ord> [0,1], (1,4], [0,1], (4,15], (1,4], (1,4], (1,4], (1,4], [0,1]...
## $ cat_density <ord> (500,4.5e+03], (500,4.5e+03], (4.5e+03,Inf], (500,4.5e+03], (4...
Before we do anything with the data, we first check for missing values in the dataset. In some cases, missing data is coded by a special character rather than as a blank, so we first correct for this.
### _TEMPLATE_
### ADD CODE TO CORRECT FOR DATA ENCODING HERE
With missing data properly encoded, we now visualise the missing data in a number of different ways.
We first examine a simple univariate count of all the missing data:
row_count <- data_tbl %>% nrow
missing_univariate_tbl <- data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
We remove all variables where all of the entries are missing
remove_vars <- missing_univariate_tbl %>%
filter(missing_count == row_count) %>%
.[['variable']]
lessmiss_data_tbl <- data_tbl %>%
select(-one_of(remove_vars))
With these columns removed, we repeat the exercise.
missing_univariate_tbl <- lessmiss_data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
To reduce the scale of this plot, we look at the top twenty missing data counts.
missing_univariate_top_tbl <- missing_univariate_tbl %>%
arrange(desc(missing_count)) %>%
top_n(n = 50, wt = missing_count)
ggplot(missing_univariate_top_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
It is useful to get an idea of what combinations of variables tend to have variables with missing values simultaneously, so to construct a visualisation for this we create a count of all the times given combinations of variables have missing values, producing a heat map for these combination counts.
missing_plot_tbl <- rawdata_tbl %>%
mutate_all(funs(is.na)) %>%
mutate_all(funs(as.numeric)) %>%
mutate(label = do.call(paste0, (.))) %>%
group_by(label) %>%
summarise_all(funs(sum)) %>%
arrange(desc(label)) %>%
select(-label) %>%
mutate(rowid = do.call(pmax, (.))) %>%
gather('col','count', -rowid) %>%
mutate(Proportion = count / row_count
,rowid = round(rowid / row_count, 4)
)
ggplot(missing_plot_tbl) +
geom_tile(aes(x = col, y = as.factor(rowid), fill = Proportion), height = 0.8) +
scale_fill_continuous(labels = comma) +
scale_x_discrete(position = 'top') +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
This visualisation takes a little explaining.
Each row represents a combination of variables with simultaneous missing values. For each row in the graphic, the coloured entries show which particular variables are missing in that combination. The proportion of rows with that combination is displayed in both the label for the row and the colouring for the cells in the row.
With the raw data loaded up we now remove obvious unique or near-unique variables that are not amenable to basic exploration and plotting.
coltype_lst <- create_coltype_list(data_tbl)
catvar_valuecount_tbl <- data_tbl %>%
summarise_at(coltype_lst$split$discrete
,function(x) length(unique(x))) %>%
gather('var_name', 'level_count') %>%
arrange(-level_count)
print(catvar_valuecount_tbl)
## # A tibble: 8 x 2
## var_name level_count
## <chr> <int>
## 1 policy_id 15390
## 2 power 12
## 3 region 10
## 4 brand 7
## 5 cat_driver_age 5
## 6 cat_density 5
## 7 cat_car_age 4
## 8 gas 2
row_count <- nrow(data_tbl)
cat(paste0("Dataset has ", row_count, " rows\n"))
## Dataset has 16181 rows
Now that we a table of the counts of all the categorical variables we can automatically exclude unique variables from the exploration, as the level count will match the row count.
unique_vars <- catvar_valuecount_tbl %>%
filter(level_count == row_count) %>%
.[["var_name"]]
print(unique_vars)
## character(0)
explore_data_tbl <- data_tbl %>%
select(-one_of(unique_vars))
Having removed the unique identifier variables from the dataset, we may also wish to exclude categoricals with high level counts also, so we create a vector of those variable names.
highcount_vars <- catvar_valuecount_tbl %>%
filter(level_count >= dataexp_level_exclusion_threshold
,level_count < row_count) %>%
.[["var_name"]]
cat(paste0(highcount_vars, collapse = ', '))
## policy_id
We now can continue doing some basic exploration of the data. We may also choose to remove some extra columns from the dataset.
### You may want to comment out these next few lines to customise which
### categoricals are kept in the exploration.
drop_vars <- c(highcount_vars)
if(length(drop_vars) > 0) {
explore_data_tbl <- explore_data_tbl %>%
select(-one_of(drop_vars))
cat(paste0(drop_vars, collapse = ', '))
}
## policy_id
write_feather(data_tbl, path = 'data/claim_data.feather')
Now that we have loaded the data we can prepare it for some basic data exploration. We first exclude the variables that are unique identifiers or similar, and tehen split the remaining variables out into various categories to help with the systematic data exploration.
coltype_lst <- create_coltype_list(explore_data_tbl)
print(coltype_lst)
## $split
## $split$continuous
## [1] "claim_amount" "exposure" "car_age" "driver_age" "density"
##
## $split$discrete
## [1] "power" "brand" "gas" "region" "cat_driver_age"
## [6] "cat_car_age" "cat_density"
##
##
## $columns
## claim_amount exposure power car_age driver_age brand
## "continuous" "continuous" "discrete" "continuous" "continuous" "discrete"
## gas region density cat_driver_age cat_car_age cat_density
## "discrete" "discrete" "continuous" "discrete" "discrete" "discrete"
Logical variables only take two values: TRUE or FALSE. It is useful to see missing data as well though, so we also plot the count of those.
logical_vars <- coltype_lst$split$logical
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
explore_plot <- ggplot(explore_data_tbl) +
geom_bar(aes_(x = plot_varname)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
Numeric variables are usually continuous in nature, though we also have integer data.
numeric_vars <- coltype_lst$split$continuous
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
geom_vline(xintercept = mean (plot_var, na.rm = TRUE), colour = 'red', size = 1.5) +
geom_vline(xintercept = median(plot_var, na.rm = TRUE), colour = 'green', size = 1.5) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Histogram Plot for Variable: ', plot_varname
,' (', na_count, ' missing values)')
,subtitle = '(red line is mean, green line is median)')
print(explore_plot)
}
## --
## claim_amount
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2 698 1156 2130 1243 2036833
## --
## exposure
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00274 0.46000 0.80000 0.70747 1.00000 1.50000
## --
## car_age
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00 3.00 7.00 7.67 11.00 99.00
## --
## driver_age
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 18.0 33.0 44.0 44.9 54.0 99.0
## --
## density
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2 84 388 2114 1466 27000
Categorical variables only have values from a limited, and usually fixed, number of possible values
categorical_vars <- coltype_lst$split$discrete
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
plot_tbl <- explore_data_tbl %>%
.[[plot_varname]] %>%
as.character %>%
fct_lump(n = dataexp_cat_level_count) %>%
fct_count
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = fct_reorder(f, -n), weight = n)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## power
## --
## brand
## --
## gas
## --
## region
## --
## cat_driver_age
## --
## cat_car_age
## --
## cat_density
Date/Time variables represent calendar or time-based data should as time of the day, a date, or a timestamp.
datetime_vars <- coltype_lst$split$datetime
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Dates/Times in Variable: ', plot_varname
,' (', na_count, ' missing values)'))
plot(explore_plot)
}
We look at the log-log plot of claim size against the cumulative number of claims of at least the size to investigate if the claim frequency obeys a power law.
claimsize_count <- function(claimsize, claimdata_tbl) {
claimdata_tbl %>%
filter(claim_amount >= 10^claimsize) %>%
nrow
}
logsize_seq <- seq(0,7, by = 0.1)
powerlaw_tbl <- data_frame(
logsize = logsize_seq
,count = map_int(logsize_seq, claimsize_count, claimdata_tbl = data_tbl)
)
ggplot(powerlaw_tbl) +
geom_line(aes(x = logsize, y = log(count))) +
xlab('Log of Claim Size') +
ylab('Log of Claim Count') +
ggtitle("Power-law Scaling of Claim Sizes")
For claims above 1,000 or so (\(\log \text{Claim} = 3\)) a straight line fit may provide a reasonable fit.
ggplot(powerlaw_tbl %>% filter(logsize >= 3)) +
geom_line(aes(x = logsize, y = log(count))) +
geom_smooth(aes(x = logsize, y = log(count)), method = 'lm', se = TRUE) +
xlab('Log of Claim Size') +
ylab('Log of Claim Count') +
ggtitle("Fitted Scaling for Large Claim Sizes")
Encouraged by the above plots, we model the higher part of the claim distribution with a power law - probably to work on the likelihood of larger claims.
We now move on to looking at bivariate plots of the data set.
Pairs plots area very useful way of getting a quick idea of the relationships between variables in a data set.
Unfortunately, they do not scale well. Too many rows (say more than 5,000) can slow down the rendering, and more than 10 columns can make the plots uninterpretable as each cell is too small.
The technique is useful, so to circumvent these issues we sample the dataset. We select random columns and rows, and make a pairs plot of the subset, repeating this process for a number of iterations.
dataexp_pairsplot_itercount <- 3
dataexp_pairsplot_colcount <- 5
dataexp_pairsplot_rowcount <- 5000
if(ncol(data_tbl) > dataexp_pairsplot_colcount ||
nrow(data_tbl) > dataexp_pairsplot_rowcount) {
### Ugly hack to work around current dplyr bug for mutate_if
if(any(sapply(explore_data_tbl, is.logical))) {
conv_tbl <- explore_data_tbl %>%
mutate_if(is.logical, as.factor)
} else {
conv_tbl <- explore_data_tbl
}
conv_tbl <- conv_tbl %>%
mutate_if(function(x) (is.character(x) || is.factor(x)) && !all(is.na(x))
,function(x) fct_lump(x, n = 9))
for(i in 1:dataexp_pairsplot_itercount) {
cat("--\n")
cat(paste0("Pairs plot iter: ", i, "\n"))
pairs_tbl <- conv_tbl %>%
create_ggpairs_tbl(sample_cols = dataexp_pairsplot_colcount
,sample_rows = dataexp_pairsplot_rowcount
)
cat(paste0("Columns: ", paste0(names(pairs_tbl), collapse = ', '), "\n"))
pairs_tbl %>%
ggpairs(cardinality_threshold = NULL
,lower = list(combo = wrap('facethist', bins = 25))
) %>%
print
}
} else {
ggpairs(data_tbl) %>% print
}
## --
## Pairs plot iter: 1
## Columns: power, brand, region, cat_car_age, cat_density
## --
## Pairs plot iter: 2
## Columns: claim_amount, power, gas, region, cat_driver_age
## --
## Pairs plot iter: 3
## Columns: exposure, driver_age, brand, density, cat_driver_age
We want to look at how the variables split on the logical variables as this is a very natural way to observe the data.
### _TEMPLATE_
facet_varname <- 'gas'
facet_count_max <- 3
facet_formula <- formula(paste0("~ as.factor(", facet_varname, ")"))
For logical variables we facet on barplots of the levels, comparing TRUE, FALSE and missing data.
logical_vars <- logical_vars[!logical_vars %in% facet_varname]
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Barplots for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
For numeric variables, we facet on histograms of the data.
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
print(explore_plot)
}
## --
## claim_amount
## --
## exposure
## --
## car_age
## --
## driver_age
## --
## density
We treat categorical variables like logical variables, faceting the barplots of the different levels of the data.
categorical_vars <- categorical_vars[!categorical_vars %in% facet_varname]
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## power
## --
## brand
## --
## region
## --
## cat_driver_age
## --
## cat_car_age
## --
## cat_density
Like the univariate plots, we facet on histograms of the years in the dates.
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>%
filter_(filter_formula) %>%
mutate_(plot_vartmp = plot_varname) %>%
mutate(plot_var = year(plot_vartmp))
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = plot_var)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname))
plot(explore_plot)
}
Having looked at the pairs plots we also look at multivariate plots of all the data. We do this using techniques known as ‘multidimensional scaling’ or MDS.
Many of these techniques do not scale well beyond a few thousand data points, so we repeat our sampling trick as before and create multiple plots from these samples.
numeric_vars <- create_coltype_list(explore_data_tbl)$split$continuous
We start with classic multidimensional scaling, also called ‘principal coordinates analysis’, which is done in R via the function cmdscale.
mds_iter_count <- 4
mds_sample_count <- 2000
row_ids <- data_tbl %>%
select(one_of(numeric_vars)) %>%
complete.cases
### _TEMPLATE_
### Choosing the first variable in the categorical list by default. You probably
### want to change that.
colour_var <- 'power'
input_tbl <- data_tbl %>%
select(one_of(c(numeric_vars, colour_var))) %>%
filter(row_ids)
construct_mds_plot <- function(mds_tbl) {
num_mds_dist <- mds_tbl %>% select(one_of(numeric_vars)) %>% dist
num_mds <- cmdscale(num_mds_dist, k = 2, eig = TRUE)
mds_tbl <- mds_tbl %>%
mutate(mds_d1 = num_mds$points[,1]
,mds_d2 = num_mds$points[,2])
mds_plot <- ggplot(mds_tbl) +
geom_point(aes_string(x = 'mds_d1', y = 'mds_d2', colour = colour_var)) +
xlab("MDS Dim 1") +
ylab("MDS Dim 2")
return(mds_plot)
}
mds_lst <- create_sampled_output(input_tbl, construct_mds_plot, mds_sample_count, mds_iter_count)
for(i in 1:length(mds_lst)) {
cat("--\n")
cat(paste0("MDS plot iter: ", i, "\n"))
mds_lst[[i]] %>% print
}
## --
## MDS plot iter: 1
## --
## MDS plot iter: 2
## --
## MDS plot iter: 3
## --
## MDS plot iter: 4
One standard method for doing this is t-SNE, t-distributed Stochastic Neighbourhood Embedding. This algorithm is a type of dimensionality reduction - it constructs a lower-dimensional set of data from the original dataset by attempting the minimise the Kullback-Lieber divergence between the original and target datasets.
t-SNE requires unique datapoints, so to ensure we do not pass repeated rows at any point, we may add a small amount of noise to the numeric columns to ensure uniqueness - t-SNE is a probabilistic process so this should not affect our output very much.
As with previous methods, we take samples from larger datasets and plot outputs from multiple samples.
tsne_iter_count <- 4
tsne_sample_count <- 5000
row_ids <- data_tbl %>%
select(one_of(numeric_vars)) %>%
complete.cases
### _TEMPLATE_
### Choosing the first variable in the categorical list by default. You probably
### want to change that.
colour_var <- 'power'
input_tbl <- data_tbl %>%
select(one_of(c(numeric_vars, colour_var))) %>%
jitter_numeric_variables %>%
filter(row_ids)
construct_tsne_plot <- function(tsne_tbl) {
data_tsne <- Rtsne(tsne_tbl %>% select(one_of(numeric_vars)), theta = 0.9)
tsne_tbl$tsne_d1 <- data_tsne$Y[,1]
tsne_tbl$tsne_d2 <- data_tsne$Y[,2]
tsne_plot <- ggplot(tsne_tbl) +
geom_point(aes_string(x = 'tsne_d1', y = 'tsne_d2', colour = colour_var)
,size = 0.5) +
xlab("t-SNE Dim 1") +
ylab("t-SNE Dim 2")
return(tsne_plot)
}
tsne_lst <- create_sampled_output(input_tbl, construct_tsne_plot, tsne_sample_count, tsne_iter_count)
for(i in 1:length(tsne_lst)) {
cat("--\n")
cat(paste0("t-SNE plot iter: ", i, "\n"))
tsne_lst[[i]] %>% print
}
## --
## t-SNE plot iter: 1
## --
## t-SNE plot iter: 2
## --
## t-SNE plot iter: 3
## --
## t-SNE plot iter: 4
Another important part of data exploration is the identification of possible outliers, and we approach this in multiple ways.
In keeping with the methodical approach we start with a univariate perspective, looking at each numerical variable by itself and plotting the values in the variable both with and without the outliers.
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- data_tbl %>% .[[plot_varname]]
outlier_point <- identify_univariate_outliers(plot_var)
no_outlier_vals <- plot_var[outlier_point]
all_plot <- ggplot() +
geom_histogram(aes(x = plot_var), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle("All Data") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
no_outlier_plot <- ggplot() +
geom_histogram(aes(x = no_outlier_vals), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle("No Outliers") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot_grid(all_plot, no_outlier_plot, ncol = 2) %>% print
}
## --
## claim_amount
## --
## exposure
## --
## car_age
## --
## driver_age
## --
## density
We use the above plots to decide if we need to remove certain extreme values from the dataset.
# We place basic logic for identifying univariate outliers here
#data_filt_tbl <- data_tbl %>% mutate(uni_outlier = ifelse(val > 100))
Sometimes outliers are so not because of individual values, but because the particular combination of values is unusual and so may have undue influence on the analysis.
Our univariate approach will not discover these outliers, we need to consider multiple dimensions at once. In analogy to the univariate analysis, there are multiple possible approaches we might take, but we start with a standard and common approach: we calculate the Mahalanobis distance for each data point and then look to label any data points with very high distances as being ‘outliers’.
The Mahalanobis distance, \(D_M\), can be thought of as the multivariate equivalent of ‘number of standard deviations away from the mean’. If we consider the data to be a set of multivariate values with mean vector \(\mathbf{\mu}\) and covariance \(\Sigma\), the Mahalanobis distance for any individual datapoint \(\mathbf{x}\) is calculated as follows:
\[ D_M(\mathbf{x}) = \sqrt{(\mathbf{x} - \mathbf{\mu})^T \, \Sigma^{-1} \, (\mathbf{x} - \mathbf{\mu})} \]
We think of the Mahalanobis distance as being an extension of the standard Euclidean distance where the correlation amongst variables is accounted for. Thus, data points that differ mainly along higher-correlated axes are considered ‘closer’ than datapoints different along less-correlated axes.
To perform this calculation we need estimates for \(\mu\) and \(\Sigma\). As we are assuming the presence of outliers, we use robust methods to obtain our estimates of the mean and the covariance. The cov.rob() function from the MASS package is used. For simplicity, we start with just the numerical variables, extending this approach after.
num_data_tbl <- data_tbl %>%
select(one_of(numeric_vars))
complete_flag <- num_data_tbl %>% complete.cases
Now that we have a dataset of numeric variables with no missing data we calculate robust estimates for the mean and covariance of the data.
mcd_estimate <- num_data_tbl %>%
filter(complete_flag) %>%
robustbase::covMcd()
With robust estimates for both the mean and covariance, we now calculate the Mahalanobis distance for each of the datapoints.
m_dist <- rep(NA, row_count)
m_dist[complete_flag] <- num_data_tbl %>%
filter(complete_flag) %>%
mahalanobis(center = mcd_estimate$center, cov = mcd_estimate$cov) %>%
sqrt
data_tbl <- data_tbl %>%
mutate(m_dist = m_dist)
We have calculated the Mahalanobis distance and appended it to the data, so we look at a cumulative plot of these distances to see if any are so far removed from the data we may consider labelling them as outliers.
cutoff_percentile <- 0.99
ggplot(data_tbl %>% filter(!is.na(m_dist))) +
geom_line(aes(x = seq_along(m_dist) / length(m_dist), y = sort(m_dist))) +
geom_vline(aes(xintercept = cutoff_percentile), colour = 'red') +
xlab("Quantile Percentage") +
ylab("Mahalanobis Distance") +
ggtitle("Percentile Plot of Mahalanobis Distance")
cutoff_distance <- quantile(m_dist, probs = cutoff_percentile, na.rm = TRUE)
data_tbl <- data_tbl %>%
mutate(mcd_outlier = m_dist >= cutoff_distance)
Having labelled data points as outliers, we now redo this percentile plot to see how it looks
ggplot(data_tbl %>% filter(mcd_outlier == FALSE)) +
geom_line(aes(x = seq_along(m_dist) / length(m_dist), y = sort(m_dist))) +
geom_vline(aes(xintercept = cutoff_percentile), colour = 'red') +
xlab("Quantile Percentage") +
ylab("Mahalanobis Distance") +
ggtitle("No-Outlier Percentile Plot of Mahalanobis Distance")